func runtime.alignUp

36 uses

	runtime (current package)
		cgocall.go#L468: 		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
		malloc.go#L601: 		p = alignUp(p+(256<<10), heapArenaBytes)
		malloc.go#L653: 	n = alignUp(n, heapArenaBytes)
		malloc.go#L857: 		p = alignUp(p, align)
		malloc.go#L871: 		pAligned := alignUp(p, align)
		malloc.go#L1147: 		off = alignUp(off, 8)
		malloc.go#L1155: 		off = alignUp(off, 8)
		malloc.go#L1157: 		off = alignUp(off, 4)
		malloc.go#L1159: 		off = alignUp(off, 2)
		malloc.go#L1950: 	persistent.off = alignUp(persistent.off, align)
		malloc.go#L1968: 		persistent.off = alignUp(goarch.PtrSize, align)
		malloc.go#L2027: 	p := alignUp(l.next, align)
		malloc.go#L2032: 	if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
		mbitmap.go#L760: 	off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
		mem_linux.go#L100: 		beg := alignUp(uintptr(v), physHugePageSize)
		mfinal.go#L526: 		nret = alignUp(nret, uintptr(t.Align_)) + t.Size_
		mfinal.go#L528: 	nret = alignUp(nret, goarch.PtrSize)
		mgcscavenge.go#L910: 		max = alignUp(max, minimum)
		mgcscavenge.go#L970: 		hugePageAbove := uint(alignUp(uintptr(start), pagesPerHugePage))
		mheap.go#L1243: 		base = alignUp(base, physPageSize)
		mheap.go#L1488: 	ask := alignUp(npage, pallocChunkPages) * pageSize
		mheap.go#L1494: 	nBase := alignUp(end, physPageSize)
		mheap.go#L1537: 		nBase = alignUp(h.curArena.base+ask, physPageSize)
		mpagealloc.go#L177: 	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
		mpagealloc.go#L362: 	limit := alignUp(base+size, pallocChunkBytes)
		mpagealloc_64bit.go#L78: 		b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
		mpagealloc_64bit.go#L119: 		limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
		mpagealloc_64bit.go#L211: 	needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
		mranges.go#L77: 	base := alignUp(a.base.addr(), uintptr(align)) + len
		pinner.go#L302: 	bytes := alignUp(s.pinnerBitSize(), 8)
		proc.go#L5078: 	totalSize = alignUp(totalSize, sys.StackAlign)
		stack.go#L352: 		n = uint32(alignUp(uintptr(n), physPageSize))
		stkframe.go#L281: 		off:       -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
		stubs.go#L372: func alignUp(n, a uintptr) uintptr {
		traceback.go#L503: 		frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
		traceregion.go#L44: 	n = alignUp(n, 8)